bitkeeper revision 1.749.1.1 (403cd19e9cL26IazEdGTvx0tHxbYqw)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Wed, 25 Feb 2004 16:47:26 +0000 (16:47 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Wed, 25 Feb 2004 16:47:26 +0000 (16:47 +0000)
event_channel.h:
  new file
sched.h, hypervisor-if.h, event_channel.c, domain.c, apic.c:
  Rewritten event-channel code.

.rootkeys
xen/arch/i386/apic.c
xen/common/domain.c
xen/common/event_channel.c
xen/include/hypervisor-ifs/event_channel.h [new file with mode: 0644]
xen/include/hypervisor-ifs/hypervisor-if.h
xen/include/xeno/sched.h

index 788428d145f67fa875a1970c2ca16f214c55254e..9e80ed605d2ef13b9d82e303bfe43d6b6ed1bcb1 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
 3ddb79c2YTaZwOqWin9-QNgHge5RVw xen/include/hypervisor-ifs/block.h
 3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen/include/hypervisor-ifs/dom0_ops.h
 3e6377eaioRoNm0m_HSDEAd4Vqrq_w xen/include/hypervisor-ifs/dom_mem_ops.h
+403cd194j2pyLqXD8FJ-ukvZzkPenw xen/include/hypervisor-ifs/event_channel.h
 3ddb79c25UE59iu4JJcbRalx95mvcg xen/include/hypervisor-ifs/hypervisor-if.h
 3ead095dE_VF-QA88rl_5cWYRWtRVQ xen/include/hypervisor-ifs/kbd.h
 3ddb79c2oRPrzClk3zbTkRHlpumzKA xen/include/hypervisor-ifs/network.h
index 469890b488d4d74ff9ee36a8e305d5dc1e387f83..e622cc410435f9e8c2a32b75d70a8f25efb101f7 100644 (file)
@@ -659,7 +659,6 @@ void __init setup_APIC_clocks (void)
  */
 int reprogram_ac_timer(s_time_t timeout)
 {
-    int         cpu = smp_processor_id();
     s_time_t    now;
     s_time_t    expire;
     u64         apic_tmict;
@@ -669,7 +668,8 @@ int reprogram_ac_timer(s_time_t timeout)
      * cause an immediate interrupt). At least this is guaranteed to hold it
      * off for ages (esp. since the clock ticks on bus clock, not cpu clock!).
      */
-    if (timeout == 0) {
+    if ( timeout == 0 )
+    {
         apic_tmict = 0xffffffff;
         goto reprogram;
     }
@@ -677,10 +677,12 @@ int reprogram_ac_timer(s_time_t timeout)
     now = NOW();
     expire = timeout - now; /* value from now */
 
-    if (expire <= 0) {
+    if ( expire <= 0 )
+    {
         Dprintk("APICT[%02d] Timeout in the past 0x%08X%08X > 0x%08X%08X\n", 
-                cpu, (u32)(now>>32), (u32)now, (u32)(timeout>>32),(u32)timeout);
-        return 0;       /* timeout value in the past */
+                smp_processor_id(), (u32)(now>>32), 
+                (u32)now, (u32)(timeout>>32),(u32)timeout);
+        return 0;
     }
 
     /*
@@ -693,12 +695,15 @@ int reprogram_ac_timer(s_time_t timeout)
     /* conversion to bus units */
     apic_tmict = (((u64)bus_scale) * expire)>>18;
 
-    if (apic_tmict >= 0xffffffff) {
-        Dprintk("APICT[%02d] Timeout value too large\n", cpu);
+    if ( apic_tmict >= 0xffffffff )
+    {
+        Dprintk("APICT[%02d] Timeout value too large\n", smp_processor_id());
         apic_tmict = 0xffffffff;
     }
-    if (apic_tmict == 0) {
-        Dprintk("APICT[%02d] timeout value too small\n", cpu);
+
+    if ( apic_tmict == 0 )
+    {
+        Dprintk("APICT[%02d] timeout value too small\n", smp_processor_id());
         return 0;
     }
 
index 85a3c8ae31a806261006185ab780be434f5fe6e1..597429b3575f026f7ede0321330b453e201cc72e 100644 (file)
@@ -127,6 +127,8 @@ void kill_domain_with_errmsg(const char *err)
 
 void __kill_domain(struct task_struct *p)
 {
+    extern void destroy_event_channels(struct task_struct *);
+
     int i;
     struct task_struct **pp;
     unsigned long flags;
@@ -149,6 +151,8 @@ void __kill_domain(struct task_struct *p)
     for ( i = 0; i < MAX_DOMAIN_VIFS; i++ )
         unlink_net_vif(p->net_vif_list[i]);
 
+    destroy_event_channels(p);
+
     /*
      * Note this means that find_domain_by_id may fail, even when the caller
      * holds a reference to the domain being queried. Take care!
@@ -467,8 +471,6 @@ unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes)
 /* Release resources belonging to task @p. */
 void release_task(struct task_struct *p)
 {
-    extern void destroy_event_channels(struct task_struct *);
-
     ASSERT(p->state == TASK_DYING);
     ASSERT(!p->has_cpu);
 
@@ -481,7 +483,6 @@ void release_task(struct task_struct *p)
     destroy_blkdev_info(p);
 
     /* Free all memory associated with this domain. */
-    destroy_event_channels(p);
     free_page((unsigned long)p->mm.perdomain_pt);
     UNSHARE_PFN(virt_to_page(p->shared_info));
     free_all_dom_mem(p);
index 3674f560190328dcdbc54e7a095f46cd52a99ea5..ad668e8684ef2cbf022a7c9f81e0890af65601d3 100644 (file)
@@ -3,7 +3,7 @@
  * 
  * Event channels between domains.
  * 
- * Copyright (c) 2003, K A Fraser.
+ * Copyright (c) 2003-2004, K A Fraser.
  * 
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 #include <xeno/sched.h>
 #include <xeno/event.h>
 
+#include <hypervisor-ifs/hypervisor-if.h>
+#include <hypervisor-ifs/event_channel.h>
+
 #define MAX_EVENT_CHANNELS 1024
 
-static long event_channel_open(u16 target_dom)
+static int get_free_port(struct task_struct *p)
 {
-    struct task_struct *lp = current, *rp;
-    int                 i, lmax, rmax, lid, rid;
-    event_channel_t    *lchn, *rchn;
-    shared_info_t      *rsi;
+    int max, port;
+    event_channel_t *chn;
+
+    max = p->max_event_channel;
+    chn = p->event_channel;
+
+    for ( port = 0; port < max; port++ )
+        if ( chn[port].state == ECS_FREE )
+            break;
+
+    if ( port == max )
+    {
+        if ( max == MAX_EVENT_CHANNELS )
+            return -ENOSPC;
+        
+        max = (max == 0) ? 4 : (max * 2);
+        
+        chn = kmalloc(max * sizeof(event_channel_t), GFP_KERNEL);
+        if ( unlikely(chn == NULL) )
+            return -ENOMEM;
+
+        memset(chn, 0, max * sizeof(event_channel_t));
+
+        if ( p->event_channel != NULL )
+        {
+            memcpy(chn, p->event_channel, (max/2) * sizeof(event_channel_t));
+            kfree(p->event_channel);
+        }
+
+        p->event_channel     = chn;
+        p->max_event_channel = max;
+    }
+
+    return port;
+}
+
+static inline unsigned long set_event_pending(struct task_struct *p, int port)
+{
+    if ( !test_and_set_bit(port,    &p->shared_info->event_channel_pend[0]) &&
+         !test_and_set_bit(port>>5, &p->shared_info->event_channel_pend_sel) )
+        return mark_guest_event(p, _EVENT_EVTCHN);
+    return 0;
+}
+
+static inline unsigned long set_event_disc(struct task_struct *p, int port)
+{
+    if ( !test_and_set_bit(port,    &p->shared_info->event_channel_disc[0]) &&
+         !test_and_set_bit(port>>5, &p->shared_info->event_channel_disc_sel) )
+        return mark_guest_event(p, _EVENT_EVTCHN);
+    return 0;
+}
+
+static long event_channel_open(evtchn_open_t *open)
+{
+    struct task_struct *lp, *rp;
+    int                 lport = 0, rport = 0;
     unsigned long       cpu_mask;
+    domid_t             ldom = open->local_dom, rdom = open->remote_dom;
     long                rc = 0;
 
-    rp = find_domain_by_id(target_dom);
+    if ( !IS_PRIV(current) )
+        return -EPERM;
+
+    /* 'local_dom' may be DOMID_SELF. 'remote_dom' cannot be.*/
+    if ( ldom == DOMID_SELF )
+        ldom = current->domain;
 
-    /*
-     * We need locks at both ends to make a connection. We avoid deadlock
-     * by acquiring the locks in address order.
-     */
-    if ( (unsigned long)lp < (unsigned long)rp )
+    /* Event channel must connect distinct domains. */
+    if ( ldom == rdom )
+        return -EINVAL;
+
+    if ( ((lp = find_domain_by_id(ldom)) == NULL) ||
+         ((rp = find_domain_by_id(rdom)) == NULL) )
+    {
+        if ( lp != NULL )
+            put_task_struct(lp);
+        return -ESRCH;
+    }
+
+    /* Avoid deadlock by first acquiring lock of domain with smaller id. */
+    if ( ldom < rdom )
     {
         spin_lock(&lp->event_channel_lock);
         spin_lock(&rp->event_channel_lock);
     }
     else
     {
-        if ( likely(rp != NULL) )
-            spin_lock(&rp->event_channel_lock);
+        spin_lock(&rp->event_channel_lock);
         spin_lock(&lp->event_channel_lock);
     }
 
-    lmax = lp->max_event_channel;
-    lchn = lp->event_channel;
-    lid  = -1;
-
-    /*
-     * Find the first unused event channel. Also ensure bo channel already
-     * exists to the specified target domain.
-     */
-    for ( i = 0; i < lmax; i++ )
+    if ( (lport = get_free_port(lp)) < 0 )
     {
-        if ( (lid == -1) && !(lchn[i].flags & ECF_INUSE) )
-        {
-            lid = i;
-        }
-        else if ( unlikely(lchn[i].target_dom == target_dom) )
-        {
-            rc = -EEXIST;
-            goto out;
-        }
+        rc = lport;
+        goto out;
     }
-    
-    /* If there is no free slot we need to allocate a bigger channel list. */
-    if ( unlikely(lid == -1) )
-    {
-        /* Reached maximum channel count? */
-        if ( unlikely(lmax == MAX_EVENT_CHANNELS) )
-        {
-            rc = -ENOSPC;
-            goto out;
-        }
-        
-        lmax = (lmax == 0) ? 4 : (lmax * 2);
-        
-        lchn = kmalloc(lmax * sizeof(event_channel_t), GFP_KERNEL);
-        if ( unlikely(lchn == NULL) )
-        {
-            rc = -ENOMEM;
-            goto out;
-        }
 
-        memset(lchn, 0, lmax * sizeof(event_channel_t));
-        
-        if ( likely(lp->event_channel != NULL) )
-            kfree(lp->event_channel);
-
-        lp->event_channel     = lchn;
-        lp->max_event_channel = lmax;
+    if ( (rport = get_free_port(rp)) < 0 )
+    {
+        rc = rport;
+        goto out;
     }
 
-    lchn[lid].target_dom = target_dom;
-    lchn[lid].flags      = ECF_INUSE;
+    lp->event_channel[lport].remote_dom  = rp;
+    lp->event_channel[lport].remote_port = (u16)rport;
+    lp->event_channel[lport].state       = ECS_CONNECTED;
 
-    if ( likely(rp != NULL) )
-    {
-        rchn = rp->event_channel;
-        rmax = rp->max_event_channel;
-        
-        for ( rid = 0; rid < rmax; rid++ )
-        {
-            if ( (rchn[rid].target_dom == lp->domain) &&
-                 (rchn[rid].flags & ECF_INUSE) )
-            {
-                /*
-                 * The target was awaiting a connection. We make the connection
-                 * and send a connection-made event to the remote end.
-                 */
-                rchn[rid].flags = ECF_INUSE | ECF_CONNECTED | lid;
-                lchn[lid].flags = ECF_INUSE | ECF_CONNECTED | rid;
-
-                rsi = rp->shared_info;
-                if ( !test_and_set_bit(rid,    &rsi->event_channel_pend[0]) &&
-                     !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) )
-                {
-                    cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
-                    guest_event_notify(cpu_mask);
-                }
-
-                break;
-            }
-        }
-    }
+    rp->event_channel[rport].remote_dom  = lp;
+    rp->event_channel[rport].remote_port = (u16)lport;
+    rp->event_channel[rport].state       = ECS_CONNECTED;
+
+    cpu_mask  = set_event_pending(lp, lport);
+    cpu_mask |= set_event_pending(rp, rport);
+    guest_event_notify(cpu_mask);
     
  out:
     spin_unlock(&lp->event_channel_lock);
-    if ( rp != NULL )
-    {
-        spin_unlock(&rp->event_channel_lock);
-        put_task_struct(rp);
-    }
+    spin_unlock(&rp->event_channel_lock);
+    
+    put_task_struct(lp);
+    put_task_struct(rp);
+
+    open->local_port  = lport;
+    open->remote_port = rport;
 
     return rc;
 }
 
 
-static long event_channel_close(u16 lid)
+static long __event_channel_close(struct task_struct *lp, int lport)
 {
-    struct task_struct *lp = current, *rp = NULL;
+    struct task_struct *rp = NULL;
     event_channel_t    *lchn, *rchn;
-    u16                 rid;
-    shared_info_t      *rsi;
+    int                 rport;
     unsigned long       cpu_mask;
     long                rc = 0;
 
@@ -159,21 +171,21 @@ static long event_channel_close(u16 lid)
 
     lchn = lp->event_channel;
 
-    if ( unlikely(lid >= lp->max_event_channel) || 
-         unlikely(!(lchn[lid].flags & ECF_INUSE)) )
+    if ( (lport < 0) || (lport >= lp->max_event_channel) || 
+         (lchn[lport].state == ECS_FREE) )
     {
         rc = -EINVAL;
         goto out;
     }
 
-    if ( lchn[lid].flags & ECF_CONNECTED )
+    if ( lchn[lport].state == ECS_CONNECTED )
     {
         if ( rp == NULL )
         {
-            rp = find_domain_by_id(lchn[lid].target_dom);
-            ASSERT(rp != NULL);
-            
-            if ( (unsigned long)lp < (unsigned long)rp )
+            rp = lchn[lport].remote_dom;
+            get_task_struct(rp);
+
+            if ( lp->domain < rp->domain )
             {
                 spin_lock(&rp->event_channel_lock);
             }
@@ -184,34 +196,39 @@ static long event_channel_close(u16 lid)
                 goto again;
             }
         }
-        else if ( rp->domain != lchn[lid].target_dom )
+        else if ( rp != lchn[lport].remote_dom )
         {
             rc = -EINVAL;
             goto out;
         }
         
-        rchn = rp->event_channel;
-        rid  = lchn[lid].flags & ECF_TARGET_ID;
-        ASSERT(rid < rp->max_event_channel);
-        ASSERT(rchn[rid].flags == (ECF_INUSE | ECF_CONNECTED | lid));
-        ASSERT(rchn[rid].target_dom == lp->domain);
-
-        rchn[rid].flags = ECF_INUSE;
-
-        rsi = rp->shared_info;
-        if ( !test_and_set_bit(rid,    &rsi->event_channel_disc[0]) &&
-             !test_and_set_bit(rid>>5, &rsi->event_channel_disc_sel) )
-        {
-            cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
-            guest_event_notify(cpu_mask);
-        }
+        rchn  = rp->event_channel;
+        rport = lchn[lport].remote_port;
+
+        if ( rport >= rp->max_event_channel )
+            BUG();
+        if ( rchn[rport].state != ECS_CONNECTED )
+            BUG();
+        if ( rchn[rport].remote_dom != lp )
+            BUG();
+
+        rchn[rport].state       = ECS_ZOMBIE;
+        rchn[rport].remote_dom  = NULL;
+        rchn[rport].remote_port = 0xFFFF;
+
+        cpu_mask  = set_event_disc(lp, lport);
+        cpu_mask |= set_event_disc(rp, rport);
+        guest_event_notify(cpu_mask);
     }
 
-    lchn[lid].target_dom = 0;
-    lchn[lid].flags      = 0;
+    lchn[lport].state       = ECS_FREE;
+    lchn[lport].remote_dom  = NULL;
+    lchn[lport].remote_port = 0xFFFF;
     
  out:
     spin_unlock(&lp->event_channel_lock);
+    put_task_struct(lp);
+
     if ( rp != NULL )
     {
         spin_unlock(&rp->event_channel_lock);
@@ -222,87 +239,135 @@ static long event_channel_close(u16 lid)
 }
 
 
-static long event_channel_send(u16 lid)
+static long event_channel_close(evtchn_close_t *close)
+{
+    struct task_struct *lp;
+    int                 lport = close->local_port;
+    long                rc;
+    domid_t             ldom = close->local_dom;
+
+    if ( ldom == DOMID_SELF )
+        ldom = current->domain;
+    else if ( !IS_PRIV(current) )
+        return -EPERM;
+
+    if ( (lp = find_domain_by_id(ldom)) == NULL )
+        return -ESRCH;
+
+    rc = __event_channel_close(lp, lport);
+
+    put_task_struct(lp);
+    return rc;
+}
+
+
+static long event_channel_send(int lport)
 {
     struct task_struct *lp = current, *rp;
-    u16                 rid, rdom;
-    shared_info_t      *rsi;
+    int                 rport;
     unsigned long       cpu_mask;
 
     spin_lock(&lp->event_channel_lock);
 
-    if ( unlikely(lid >= lp->max_event_channel) || 
-         unlikely(!(lp->event_channel[lid].flags & ECF_CONNECTED)) )
+    if ( unlikely(lport < 0) ||
+         unlikely(lport >= lp->max_event_channel) || 
+         unlikely(lp->event_channel[lport].state != ECS_CONNECTED) )
     {
         spin_unlock(&lp->event_channel_lock);
         return -EINVAL;
     }
 
-    rdom = lp->event_channel[lid].target_dom;
-    rid  = lp->event_channel[lid].flags & ECF_TARGET_ID;
+    rp    = lp->event_channel[lport].remote_dom;
+    rport = lp->event_channel[lport].remote_port;
 
-    spin_unlock(&lp->event_channel_lock);
+    get_task_struct(rp);
 
-    if ( unlikely(rid >= MAX_EVENT_CHANNELS) || 
-         unlikely ((rp = find_domain_by_id(rdom)) == NULL) )
-        return -EINVAL;
+    spin_unlock(&lp->event_channel_lock);
 
-    rsi = rp->shared_info;
-    if ( !test_and_set_bit(rid,    &rsi->event_channel_pend[0]) &&
-         !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) )
-    {
-        cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN);
-        guest_event_notify(cpu_mask);
-    }
+    cpu_mask = set_event_pending(rp, rport);
+    guest_event_notify(cpu_mask);
 
     put_task_struct(rp);
+
     return 0;
 }
 
 
-static long event_channel_status(u16 lid)
+static long event_channel_status(evtchn_status_t *status)
 {
-    struct task_struct *lp = current;
+    struct task_struct *lp;
+    domid_t             ldom = status->local_dom;
+    int                 lport = status->local_port;
     event_channel_t    *lchn;
-    long                rc = EVTCHNSTAT_closed;
+
+    if ( ldom == DOMID_SELF )
+        ldom = current->domain;
+    else if ( !IS_PRIV(current) )
+        return -EPERM;
+
+    if ( (lp = find_domain_by_id(ldom)) == NULL )
+        return -ESRCH;
 
     spin_lock(&lp->event_channel_lock);
 
     lchn = lp->event_channel;
 
-    if ( lid < lp->max_event_channel )
+    if ( (lport < 0) || (lport >= lp->max_event_channel) )
     {
-        if ( lchn[lid].flags & ECF_CONNECTED )
-            rc = EVTCHNSTAT_connected;        
-        else if ( lchn[lid].flags & ECF_INUSE )
-            rc = EVTCHNSTAT_disconnected;
+        spin_unlock(&lp->event_channel_lock);
+        return -EINVAL;
+    }
+
+    switch ( lchn[lport].state )
+    {
+    case ECS_FREE:
+        status->status = EVTCHNSTAT_closed;
+        break;
+    case ECS_ZOMBIE:
+        status->status = EVTCHNSTAT_disconnected;
+        break;
+    case ECS_CONNECTED:
+        status->status = EVTCHNSTAT_connected;
+        status->remote_dom  = lchn[lport].remote_dom->domain;
+        status->remote_port = lchn[lport].remote_port;
+        break;
+    default:
+        BUG();
     }
 
     spin_unlock(&lp->event_channel_lock);
-    return rc;
+    return 0;
 }
 
 
-long do_event_channel_op(unsigned int cmd, unsigned int id)
+long do_event_channel_op(evtchn_op_t *uop)
 {
     long rc;
+    evtchn_op_t op;
+
+    if ( copy_from_user(&op, uop, sizeof(op)) != 0 )
+        return -EFAULT;
 
-    switch ( cmd )
+    switch ( op.cmd )
     {
     case EVTCHNOP_open:
-        rc = event_channel_open((u16)id);
+        rc = event_channel_open(&op.u.open);
+        if ( copy_to_user(uop, &op, sizeof(op)) != 0 )
+            rc = -EFAULT; /* Cleaning up here would be a mess! */
         break;
 
     case EVTCHNOP_close:
-        rc = event_channel_close((u16)id);
+        rc = event_channel_close(&op.u.close);
         break;
 
     case EVTCHNOP_send:
-        rc = event_channel_send((u16)id);
+        rc = event_channel_send(op.u.send.local_port);
         break;
 
     case EVTCHNOP_status:
-        rc = event_channel_status((u16)id);
+        rc = event_channel_status(&op.u.status);
+        if ( copy_to_user(uop, &op, sizeof(op)) != 0 )
+            rc = -EFAULT;
         break;
 
     default:
@@ -320,7 +385,7 @@ void destroy_event_channels(struct task_struct *p)
     if ( p->event_channel != NULL )
     {
         for ( i = 0; i < p->max_event_channel; i++ )
-            (void)event_channel_close((u16)i);
+            (void)__event_channel_close(p, i);
         kfree(p->event_channel);
     }
 }
diff --git a/xen/include/hypervisor-ifs/event_channel.h b/xen/include/hypervisor-ifs/event_channel.h
new file mode 100644 (file)
index 0000000..e7c3aa7
--- /dev/null
@@ -0,0 +1,93 @@
+/******************************************************************************
+ * event_channel.h
+ * 
+ * Event channels between domains.
+ * 
+ * Copyright (c) 2003-2004, K A Fraser.
+ */
+
+#ifndef __HYPERVISOR_IFS__EVENT_CHANNEL_H__
+#define __HYPERVISOR_IFS__EVENT_CHANNEL_H__
+
+/*
+ * EVTCHNOP_open: Open a communication channel between <local_dom> and
+ * <remote_dom>.
+ * NOTES:
+ *  1. <local_dom> may be specified as DOMID_SELF.
+ *  2. Only a sufficiently-privileged domain may create an event channel.
+ *  3. <local_port> and <remote_port> are only supplied if the op succeeds.
+ */
+#define EVTCHNOP_open           0
+typedef struct evtchn_open
+{
+    /* IN parameters. */
+    domid_t local_dom, remote_dom;
+    /* OUT parameters. */
+    int     local_port, remote_port;
+} evtchn_open_t;
+
+/*
+ * EVTCHNOP_close: Close the communication channel which has an endpoint at
+ * <local_dom, local_port>.
+ * NOTES:
+ *  1. <local_dom> may be specified as DOMID_SELF.
+ *  2. Only a sufficiently-privileged domain may close an event channel
+ *     for which <local_dom> is not DOMID_SELF.
+ */
+#define EVTCHNOP_close          1
+typedef struct evtchn_close
+{
+    /* IN parameters. */
+    domid_t local_dom;
+    int     local_port;
+    /* No OUT parameters. */
+} evtchn_close_t;
+
+/*
+ * EVTCHNOP_send: Send an event to the remote end of the channel whose local
+ * endpoint is <DOMID_SELF, local_port>.
+ */
+#define EVTCHNOP_send           2
+typedef struct evtchn_send
+{
+    /* IN parameters. */
+    int     local_port;
+    /* No OUT parameters. */
+} evtchn_send_t;
+
+/*
+ * EVTCHNOP_status: Get the current status of the communication channel which
+ * has an endpoint at <local_dom, local_port>.
+ * NOTES:
+ *  1. <local_dom> may be specified as DOMID_SELF.
+ *  2. Only a sufficiently-privileged domain may obtain the status of an event
+ *     channel for which <local_dom> is not DOMID_SELF.
+ *  3. <remote_dom, remote_port> is only supplied if status is 'connected'.
+ */
+#define EVTCHNOP_status         3  /* Get status of <channel id>.         */
+typedef struct evtchn_status
+{
+    /* IN parameters */
+    domid_t local_dom;
+    int     local_port;
+    /* OUT parameters */
+    domid_t remote_dom;
+    int     remote_port;
+#define EVTCHNSTAT_closed       0  /* Chennel is not in use.              */
+#define EVTCHNSTAT_disconnected 1  /* Channel is not connected to remote. */
+#define EVTCHNSTAT_connected    2  /* Channel is connected to remote.     */
+    int     status;
+} evtchn_status_t;
+
+typedef struct evtchn_op
+{
+    int cmd; /* EVTCHNOP_* */
+    union {
+        evtchn_open_t   open;
+        evtchn_close_t  close;
+        evtchn_send_t   send;
+        evtchn_status_t status;
+    } u;
+} evtchn_op_t;
+
+#endif /* __HYPERVISOR_IFS__EVENT_CHANNEL_H__ */
index 9ee56c04d02f2bb3b48795c17e996747aae16b25..6001bbcdf748b74f55befc4f5c1cd1a9df90a9a2 100644 (file)
 #define SCHEDOP_exit            3   /* Exit and kill this domain.        */
 #define SCHEDOP_stop            4   /* Stop executing this domain.       */
 
-/*
- * EVTCHNOP_* - Event channel operations.
- */
-#define EVTCHNOP_open           0  /* Open channel to <target domain>.    */
-#define EVTCHNOP_close          1  /* Close <channel id>.                 */
-#define EVTCHNOP_send           2  /* Send event on <channel id>.         */
-#define EVTCHNOP_status         3  /* Get status of <channel id>.         */
-
-/*
- * EVTCHNSTAT_* - Non-error return values from EVTCHNOP_status.
- */
-#define EVTCHNSTAT_closed       0  /* Chennel is not in use.              */
-#define EVTCHNSTAT_disconnected 1  /* Channel is not connected to remote. */
-#define EVTCHNSTAT_connected    2  /* Channel is connected to remote.     */
-
 
 #ifndef __ASSEMBLY__
 
index 4375d9a7be28283d111c6117bb9ea636e361e1b1..d5973f3f8256827bf0c8616756eaeeb2c0736f2b 100644 (file)
@@ -45,13 +45,16 @@ extern struct mm_struct init_mm;
 
 #define IS_PRIV(_p) (test_bit(PF_PRIVILEGED, &(_p)->flags))
 
+struct task_struct;
+
 typedef struct event_channel_st
 {
-    u16 target_dom; /* Target domain (i.e. domain at remote end). */
-#define ECF_TARGET_ID ((1<<10)-1) /* Channel identifier at remote end.    */
-#define ECF_INUSE     (1<<10)     /* Is this channel descriptor in use?   */
-#define ECF_CONNECTED (1<<11)     /* Is this channel connected to remote? */
-    u16 flags;
+    struct task_struct *remote_dom;
+    u16                 remote_port;
+#define ECS_FREE      0 /* Available for use.                            */
+#define ECS_ZOMBIE    1 /* Connection is closed. Remote is disconnected. */
+#define ECS_CONNECTED 2 /* Connected to remote end.                      */
+    u16                 state;
 } event_channel_t;
 
 struct task_struct